(usually it is famously known to diverge to $\infty$)
Proof of divergence: http://www.free-education-resources.com/www.mathematik.net/harmonische-reihen/hr1s20.htm
Proof of convergence for floating point: http://www.maths.tcd.ie/pub/ims/bull71/recipnote.pdf
Resolution: http://fredrik-j.blogspot.de/2009/02/how-not-to-compute-harmonic-numbers.html
In floating point arithmetic, subtraction is rather inaccurate. Observe that 2-1.8 is not 0.2.
However, Python catches this well known phenomenon in its str-method and converts the output to a convenient number. The following two lines illustrate not only numeric subtaction inaccuracy, but also the difference between repr
and str
. repr
is designed to represent the value accurately, while str
is intended for a convenient output format.
In [7]:
print repr(2-1.8)
print str(2-1.8)
Just to mention for completeness: Don't use exact equals-operator on floats:
In [20]:
print (2-1.8 == 0.2)
#Python-hack that actually works surprisingly well:
print (str(2-1.8) == str(0.2))
#Recommended method with control over matching-precision:
threshold = 0.000000001
print (((2-1.8) - 0.2)**2 < threshold)
In [201]:
import numpy as np
import scipy.optimize as opt
a = 3.0
b = 10e5
c = 5.0
pol = np.polynomial.Polynomial((c, b, a))
def f(x):
return a*x**2+b*x+c
#return (a*x+b)*x+c
def f1(x):
return 2*a*x+b
def f2(x):
return 2*a
def solve_pq():
p = b/a
q = c/a
D = (p/2.0)**2 - q
r1 = -p/2.0+D**0.5
r2 = -p/2.0-D**0.5
return (r1, r2)
def solve_pq2():
p = b/a
q = c/a
D = (p/2.0)**2 - q
r1 = -2.0*q/(p+2.0*D**0.5)
r2 = -p/2.0-D**0.5
return (r1, r2)
def solve_companion():
p = b/a
q = c/a
C = np.array([[0.0, -q], [1.0, -p]])
return np.linalg.eigvals(C)
def solve_newton(r):
return opt.newton(f, r, tol=1.48e-10)#, fprime=None, args=(), tol=1.48e-08, maxiter=50, fprime2=None
def solve_newton2(r):
return opt.newton(f, r, tol=1.48e-10, fprime=f1)#, args=(), tol=1.48e-08, maxiter=50, fprime2=None
def solve_newton3(r):
return opt.newton(f, r, tol=1.48e-10, fprime=f1, fprime2=f2)
result = solve_pq()
print "pq"
print repr(result)
print repr(f(result[0]))
print repr(f(result[1]))
result = solve_pq2()
print "pq2"
print repr(result)
print repr(f(result[0]))
print repr(f(result[1]))
result = solve_companion()
print "companion"
print repr(result)
print repr(f(result[0]))
print repr(f(result[1]))
result[0] = solve_newton(result[0])
result[1] = solve_newton(result[1])
print "newton"
print repr(result)
print repr(f(result[0]))
print repr(f(result[1]))
result = np.polynomial.polynomial.polyroots((c, b, a))
print "numpy"
print repr(result)
print repr(f(result[1]))
print repr(f(result[0]))
In [202]:
import math
n = 645645665476.43e160
m = 125624536575.76e150
#print repr(n**4/m**4)
print repr((n/m)**4)
print repr(math.exp(4*math.log(n)-4*math.log(m)))
http://www.johndcook.com/blog/2010/01/19/dont-invert-that-matrix/
In [318]:
import numpy as np
import scipy
#m = np.array([[0.5e90, 0.00008, -0.1, 46786767], [-0.5, 0.2, -0.00001, 0.000008653], [1200000000000000.00002, -600.8, -0.5, 0.0], [-12000, 600.8, -0.698065, 650.0]])
m = np.array([[0.5, 0.00008, -0.1, 4667], [-0.5, 0.2, -0.00001, 0.000008653], [1200.00002, -600.8, -0.5, 0.0], [-12000, 600.8, -0.698065, 650.0]])
#print m
#mI = m**(-1)
mI = np.linalg.inv(m)
#print mI
#print m.dot(mI)
ev = [1.0e-12, 2.0, 88.8, -0.005]
A = m.dot(np.diag(ev)).dot(mI)
print A
print ""
AI = np.linalg.inv(A)
print AI
print ""
print A.dot(AI)
b = np.array([1.0, 2.0, 3.0, 4.0])
# Required is x solving Ax = b
def err(x1):
v = np.dot(A, x1)-b
return np.sqrt(np.inner(v, v))
x = np.dot(AI, b)
print err(x)
x2 = scipy.linalg.solve(A, b)
print err(x2)
# A = QR
Q, R = np.linalg.qr(A)
Qb = np.dot(Q.T, b)
x3 = scipy.linalg.solve_triangular(R, Qb)
print err(x3)
In [ ]: